movq EDOMAIN_vcpu_info(%rbx),%rax
pushq VCPUINFO_upcall_mask(%rax)
testb $TBF_INTERRUPT,%cl
- setnz VCPUINFO_upcall_mask(%eax)# TBF_INTERRUPT -> clear upcall mask
+ setnz VCPUINFO_upcall_mask(%rax)# TBF_INTERRUPT -> clear upcall mask
popq %rax
shll $16,%eax # Bits 16-23: saved_upcall_mask
- movw UREGS_cs+8(%esp),%ax # Bits 0-15: CS
+ movw UREGS_cs+8(%rsp),%ax # Bits 0-15: CS
FLT5: movq %rax,8(%rsi) # CS/saved_upcall_mask
movq UREGS_rip+8(%rsp),%rax
FLT6: movq %rax,(%rsi) # RIP
struct domain;
#define STACK_RESERVED \
- (sizeof(struct cpu_user_regs) + sizeof(struct domain *))
+ (sizeof(struct cpu_user_regs) + sizeof(struct domain *) + 8)
static inline struct exec_domain *get_current(void)
{
/*
* Get the bottom-of-stack, as stored in the per-CPU TSS. This is actually
- * 40 bytes before the real bottom of the stack to allow space for:
- * domain pointer, DS, ES, FS, GS
+ * 48 bytes before the real bottom of the stack to allow space for:
+ * domain pointer, padding, DS, ES, FS, GS. The padding is required to
+ * have the stack pointer 16-byte aligned: the amount we subtract from
+ * STACK_SIZE *must* be a multiple of 16.
*/
static inline unsigned long get_stack_bottom(void)
{
unsigned long p;
__asm__( "andq %%rsp,%0; addq %2,%0"
: "=r" (p)
- : "0" (~(STACK_SIZE-1)), "i" (STACK_SIZE-40) );
+ : "0" (~(STACK_SIZE-1)), "i" (STACK_SIZE-48) );
return p;
}